ARM: tegra: create common XUSB padctl driver file

A fair amount of the XUSB padctl driver will be common between Tegra124
and Tegra210. To avoid cut/paste between the two chips, create a new
file that will contain the common code, and convert the Tegra124 code to
use it. This change doesn't move every last piece of code that can/will be
shared, but rather concentrates on moving code that can be moved with zero
changes, so there are no other diffs mixed in.

Signed-off-by: Stephen Warren <swarren@nvidia.com>
Reviewed-by: Simon Glass <sjg@chromium.org>
Signed-off-by: Tom Warren <twarren@nvidia.com>
master
Stephen Warren 9 years ago committed by Tom Warren
parent 057fd32ffc
commit 1680d7b6de
  1. 1
      arch/arm/mach-tegra/tegra124/Makefile
  2. 346
      arch/arm/mach-tegra/tegra124/xusb-padctl.c
  3. 305
      arch/arm/mach-tegra/xusb-padctl-common.c
  4. 103
      arch/arm/mach-tegra/xusb-padctl-common.h

@ -11,6 +11,7 @@ obj-y += clock.o
obj-y += funcmux.o
obj-y += pinmux.o
obj-y += xusb-padctl.o
obj-y += ../xusb-padctl-common.o
ifndef CONFIG_SPL_BUILD
obj-$(CONFIG_ARMV7_NONSEC) += psci.o

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0
*/
@ -11,6 +11,8 @@
#include <fdtdec.h>
#include <malloc.h>
#include "../xusb-padctl-common.h"
#include <asm/io.h>
#include <asm/arch/clock.h>
@ -83,18 +85,6 @@ static const unsigned int tegra124_pci_functions[] = {
TEGRA124_FUNC_RSVD,
};
struct tegra_xusb_padctl_lane {
const char *name;
unsigned int offset;
unsigned int shift;
unsigned int mask;
unsigned int iddq;
const unsigned int *funcs;
unsigned int num_funcs;
};
#define TEGRA124_LANE(_name, _offset, _shift, _mask, _iddq, _funcs) \
{ \
.name = _name, \
@ -121,74 +111,6 @@ static const struct tegra_xusb_padctl_lane tegra124_lanes[] = {
TEGRA124_LANE("sata-0", 0x134, 26, 0x3, 6, pci),
};
struct tegra_xusb_phy_ops {
int (*prepare)(struct tegra_xusb_phy *phy);
int (*enable)(struct tegra_xusb_phy *phy);
int (*disable)(struct tegra_xusb_phy *phy);
int (*unprepare)(struct tegra_xusb_phy *phy);
};
struct tegra_xusb_phy {
const struct tegra_xusb_phy_ops *ops;
struct tegra_xusb_padctl *padctl;
};
struct tegra_xusb_padctl_pin {
const struct tegra_xusb_padctl_lane *lane;
unsigned int func;
int iddq;
};
#define MAX_GROUPS 3
#define MAX_PINS 6
struct tegra_xusb_padctl_group {
const char *name;
const char *pins[MAX_PINS];
unsigned int num_pins;
const char *func;
int iddq;
};
struct tegra_xusb_padctl_config {
const char *name;
struct tegra_xusb_padctl_group groups[MAX_GROUPS];
unsigned int num_groups;
};
struct tegra_xusb_padctl {
struct fdt_resource regs;
unsigned int enable;
struct tegra_xusb_phy phys[2];
const struct tegra_xusb_padctl_lane *lanes;
unsigned int num_lanes;
const char *const *functions;
unsigned int num_functions;
struct tegra_xusb_padctl_config config;
};
static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
unsigned long offset)
{
return readl(padctl->regs.start + offset);
}
static inline void padctl_writel(struct tegra_xusb_padctl *padctl,
u32 value, unsigned long offset)
{
writel(value, padctl->regs.start + offset);
}
static int tegra_xusb_padctl_enable(struct tegra_xusb_padctl *padctl)
{
u32 value;
@ -380,7 +302,7 @@ static const struct tegra_xusb_phy_ops sata_phy_ops = {
.unprepare = phy_unprepare,
};
static struct tegra_xusb_padctl *padctl = &(struct tegra_xusb_padctl) {
struct tegra_xusb_padctl *padctl = &(struct tegra_xusb_padctl) {
.phys = {
[0] = {
.ops = &pcie_phy_ops,
@ -391,214 +313,7 @@ static struct tegra_xusb_padctl *padctl = &(struct tegra_xusb_padctl) {
},
};
static const struct tegra_xusb_padctl_lane *
tegra_xusb_padctl_find_lane(struct tegra_xusb_padctl *padctl, const char *name)
{
unsigned int i;
for (i = 0; i < padctl->num_lanes; i++)
if (strcmp(name, padctl->lanes[i].name) == 0)
return &padctl->lanes[i];
return NULL;
}
static int
tegra_xusb_padctl_group_parse_dt(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_group *group,
const void *fdt, int node)
{
unsigned int i;
int len, err;
group->name = fdt_get_name(fdt, node, &len);
len = fdt_count_strings(fdt, node, "nvidia,lanes");
if (len < 0) {
error("failed to parse \"nvidia,lanes\" property");
return -EINVAL;
}
group->num_pins = len;
for (i = 0; i < group->num_pins; i++) {
err = fdt_get_string_index(fdt, node, "nvidia,lanes", i,
&group->pins[i]);
if (err < 0) {
error("failed to read string from \"nvidia,lanes\" property");
return -EINVAL;
}
}
group->num_pins = len;
err = fdt_get_string(fdt, node, "nvidia,function", &group->func);
if (err < 0) {
error("failed to parse \"nvidia,func\" property");
return -EINVAL;
}
group->iddq = fdtdec_get_int(fdt, node, "nvidia,iddq", -1);
return 0;
}
static int tegra_xusb_padctl_find_function(struct tegra_xusb_padctl *padctl,
const char *name)
{
unsigned int i;
for (i = 0; i < padctl->num_functions; i++)
if (strcmp(name, padctl->functions[i]) == 0)
return i;
return -ENOENT;
}
static int
tegra_xusb_padctl_lane_find_function(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_padctl_lane *lane,
const char *name)
{
unsigned int i;
int func;
func = tegra_xusb_padctl_find_function(padctl, name);
if (func < 0)
return func;
for (i = 0; i < lane->num_funcs; i++)
if (lane->funcs[i] == func)
return i;
return -ENOENT;
}
static int
tegra_xusb_padctl_group_apply(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_padctl_group *group)
{
unsigned int i;
for (i = 0; i < group->num_pins; i++) {
const struct tegra_xusb_padctl_lane *lane;
unsigned int func;
u32 value;
lane = tegra_xusb_padctl_find_lane(padctl, group->pins[i]);
if (!lane) {
error("no lane for pin %s", group->pins[i]);
continue;
}
func = tegra_xusb_padctl_lane_find_function(padctl, lane,
group->func);
if (func < 0) {
error("function %s invalid for lane %s: %d",
group->func, lane->name, func);
continue;
}
value = padctl_readl(padctl, lane->offset);
/* set pin function */
value &= ~(lane->mask << lane->shift);
value |= func << lane->shift;
/*
* Set IDDQ if supported on the lane and specified in the
* configuration.
*/
if (lane->iddq > 0 && group->iddq >= 0) {
if (group->iddq != 0)
value &= ~(1 << lane->iddq);
else
value |= 1 << lane->iddq;
}
padctl_writel(padctl, value, lane->offset);
}
return 0;
}
static int
tegra_xusb_padctl_config_apply(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_config *config)
{
unsigned int i;
for (i = 0; i < config->num_groups; i++) {
const struct tegra_xusb_padctl_group *group;
int err;
group = &config->groups[i];
err = tegra_xusb_padctl_group_apply(padctl, group);
if (err < 0) {
error("failed to apply group %s: %d", group->name, err);
continue;
}
}
return 0;
}
static int
tegra_xusb_padctl_config_parse_dt(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_config *config,
const void *fdt, int node)
{
int subnode;
config->name = fdt_get_name(fdt, node, NULL);
fdt_for_each_subnode(fdt, subnode, node) {
struct tegra_xusb_padctl_group *group;
int err;
group = &config->groups[config->num_groups];
err = tegra_xusb_padctl_group_parse_dt(padctl, group, fdt,
subnode);
if (err < 0) {
error("failed to parse group %s", group->name);
return err;
}
config->num_groups++;
}
return 0;
}
static int tegra_xusb_padctl_parse_dt(struct tegra_xusb_padctl *padctl,
const void *fdt, int node)
{
int subnode, err;
err = fdt_get_resource(fdt, node, "reg", 0, &padctl->regs);
if (err < 0) {
error("registers not found");
return err;
}
fdt_for_each_subnode(fdt, subnode, node) {
struct tegra_xusb_padctl_config *config = &padctl->config;
err = tegra_xusb_padctl_config_parse_dt(padctl, config, fdt,
subnode);
if (err < 0) {
error("failed to parse entry %s: %d", config->name,
err);
continue;
}
}
return 0;
}
static int process_nodes(const void *fdt, int nodes[], unsigned int count)
int process_nodes(const void *fdt, int nodes[], unsigned int count)
{
unsigned int i;
@ -648,57 +363,6 @@ static int process_nodes(const void *fdt, int nodes[], unsigned int count)
return 0;
}
struct tegra_xusb_phy *tegra_xusb_phy_get(unsigned int type)
{
struct tegra_xusb_phy *phy = NULL;
switch (type) {
case TEGRA_XUSB_PADCTL_PCIE:
phy = &padctl->phys[0];
phy->padctl = padctl;
break;
case TEGRA_XUSB_PADCTL_SATA:
phy = &padctl->phys[1];
phy->padctl = padctl;
break;
}
return phy;
}
int tegra_xusb_phy_prepare(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->prepare)
return phy->ops->prepare(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_enable(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->enable)
return phy->ops->enable(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_disable(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->disable)
return phy->ops->disable(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_unprepare(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->unprepare)
return phy->ops->unprepare(phy);
return phy ? -ENOSYS : -EINVAL;
}
void tegra_xusb_padctl_init(const void *fdt)
{
int count, nodes[1];

@ -0,0 +1,305 @@
/*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0
*/
#define pr_fmt(fmt) "tegra-xusb-padctl: " fmt
#include <common.h>
#include <errno.h>
#include "xusb-padctl-common.h"
#include <asm/arch/clock.h>
int tegra_xusb_phy_prepare(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->prepare)
return phy->ops->prepare(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_enable(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->enable)
return phy->ops->enable(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_disable(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->disable)
return phy->ops->disable(phy);
return phy ? -ENOSYS : -EINVAL;
}
int tegra_xusb_phy_unprepare(struct tegra_xusb_phy *phy)
{
if (phy && phy->ops && phy->ops->unprepare)
return phy->ops->unprepare(phy);
return phy ? -ENOSYS : -EINVAL;
}
struct tegra_xusb_phy *tegra_xusb_phy_get(unsigned int type)
{
struct tegra_xusb_phy *phy;
int i;
for (i = 0; i < padctl.socdata->num_phys; i++) {
phy = &padctl.socdata->phys[i];
if (phy->type != type)
continue;
return phy;
}
return NULL;
}
static const struct tegra_xusb_padctl_lane *
tegra_xusb_padctl_find_lane(struct tegra_xusb_padctl *padctl, const char *name)
{
unsigned int i;
for (i = 0; i < padctl->socdata->num_lanes; i++)
if (strcmp(name, padctl->socdata->lanes[i].name) == 0)
return &padctl->socdata->lanes[i];
return NULL;
}
static int
tegra_xusb_padctl_group_parse_dt(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_group *group,
const void *fdt, int node)
{
unsigned int i;
int len, err;
group->name = fdt_get_name(fdt, node, &len);
len = fdt_count_strings(fdt, node, "nvidia,lanes");
if (len < 0) {
error("failed to parse \"nvidia,lanes\" property");
return -EINVAL;
}
group->num_pins = len;
for (i = 0; i < group->num_pins; i++) {
err = fdt_get_string_index(fdt, node, "nvidia,lanes", i,
&group->pins[i]);
if (err < 0) {
error("failed to read string from \"nvidia,lanes\" property");
return -EINVAL;
}
}
group->num_pins = len;
err = fdt_get_string(fdt, node, "nvidia,function", &group->func);
if (err < 0) {
error("failed to parse \"nvidia,func\" property");
return -EINVAL;
}
group->iddq = fdtdec_get_int(fdt, node, "nvidia,iddq", -1);
return 0;
}
static int tegra_xusb_padctl_find_function(struct tegra_xusb_padctl *padctl,
const char *name)
{
unsigned int i;
for (i = 0; i < padctl->socdata->num_functions; i++)
if (strcmp(name, padctl->socdata->functions[i]) == 0)
return i;
return -ENOENT;
}
static int
tegra_xusb_padctl_lane_find_function(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_padctl_lane *lane,
const char *name)
{
unsigned int i;
int func;
func = tegra_xusb_padctl_find_function(padctl, name);
if (func < 0)
return func;
for (i = 0; i < lane->num_funcs; i++)
if (lane->funcs[i] == func)
return i;
return -ENOENT;
}
static int
tegra_xusb_padctl_group_apply(struct tegra_xusb_padctl *padctl,
const struct tegra_xusb_padctl_group *group)
{
unsigned int i;
for (i = 0; i < group->num_pins; i++) {
const struct tegra_xusb_padctl_lane *lane;
unsigned int func;
u32 value;
lane = tegra_xusb_padctl_find_lane(padctl, group->pins[i]);
if (!lane) {
error("no lane for pin %s", group->pins[i]);
continue;
}
func = tegra_xusb_padctl_lane_find_function(padctl, lane,
group->func);
if (func < 0) {
error("function %s invalid for lane %s: %d",
group->func, lane->name, func);
continue;
}
value = padctl_readl(padctl, lane->offset);
/* set pin function */
value &= ~(lane->mask << lane->shift);
value |= func << lane->shift;
/*
* Set IDDQ if supported on the lane and specified in the
* configuration.
*/
if (lane->iddq > 0 && group->iddq >= 0) {
if (group->iddq != 0)
value &= ~(1 << lane->iddq);
else
value |= 1 << lane->iddq;
}
padctl_writel(padctl, value, lane->offset);
}
return 0;
}
static int
tegra_xusb_padctl_config_apply(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_config *config)
{
unsigned int i;
for (i = 0; i < config->num_groups; i++) {
const struct tegra_xusb_padctl_group *group;
int err;
group = &config->groups[i];
err = tegra_xusb_padctl_group_apply(padctl, group);
if (err < 0) {
error("failed to apply group %s: %d",
group->name, err);
continue;
}
}
return 0;
}
static int
tegra_xusb_padctl_config_parse_dt(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_config *config,
const void *fdt, int node)
{
int subnode;
config->name = fdt_get_name(fdt, node, NULL);
fdt_for_each_subnode(fdt, subnode, node) {
struct tegra_xusb_padctl_group *group;
int err;
group = &config->groups[config->num_groups];
err = tegra_xusb_padctl_group_parse_dt(padctl, group, fdt,
subnode);
if (err < 0) {
error("failed to parse group %s", group->name);
return err;
}
config->num_groups++;
}
return 0;
}
static int tegra_xusb_padctl_parse_dt(struct tegra_xusb_padctl *padctl,
const void *fdt, int node)
{
int subnode, err;
err = fdt_get_resource(fdt, node, "reg", 0, &padctl->regs);
if (err < 0) {
error("registers not found");
return err;
}
fdt_for_each_subnode(fdt, subnode, node) {
struct tegra_xusb_padctl_config *config = &padctl->config;
err = tegra_xusb_padctl_config_parse_dt(padctl, config, fdt,
subnode);
if (err < 0) {
error("failed to parse entry %s: %d",
config->name, err);
continue;
}
}
return 0;
}
struct tegra_xusb_padctl padctl;
int tegra_xusb_process_nodes(const void *fdt, int nodes[], unsigned int count,
const struct tegra_xusb_padctl_soc *socdata)
{
unsigned int i;
int err;
for (i = 0; i < count; i++) {
if (!fdtdec_get_is_enabled(fdt, nodes[i]))
continue;
padctl.socdata = socdata;
err = tegra_xusb_padctl_parse_dt(&padctl, fdt, nodes[i]);
if (err < 0) {
error("failed to parse DT: %d", err);
continue;
}
/* deassert XUSB padctl reset */
reset_set_enable(PERIPH_ID_XUSB_PADCTL, 0);
err = tegra_xusb_padctl_config_apply(&padctl, &padctl.config);
if (err < 0) {
error("failed to apply pinmux: %d", err);
continue;
}
/* only a single instance is supported */
break;
}
return 0;
}

@ -0,0 +1,103 @@
/*
* Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _TEGRA_XUSB_PADCTL_COMMON_H_
#define _TEGRA_XUSB_PADCTL_COMMON_H_
#include <common.h>
#include <fdtdec.h>
#include <asm/io.h>
#include <asm/arch-tegra/xusb-padctl.h>
struct tegra_xusb_padctl_lane {
const char *name;
unsigned int offset;
unsigned int shift;
unsigned int mask;
unsigned int iddq;
const unsigned int *funcs;
unsigned int num_funcs;
};
struct tegra_xusb_phy_ops {
int (*prepare)(struct tegra_xusb_phy *phy);
int (*enable)(struct tegra_xusb_phy *phy);
int (*disable)(struct tegra_xusb_phy *phy);
int (*unprepare)(struct tegra_xusb_phy *phy);
};
struct tegra_xusb_phy {
const struct tegra_xusb_phy_ops *ops;
struct tegra_xusb_padctl *padctl;
};
struct tegra_xusb_padctl_pin {
const struct tegra_xusb_padctl_lane *lane;
unsigned int func;
int iddq;
};
#define MAX_GROUPS 3
#define MAX_PINS 6
struct tegra_xusb_padctl_group {
const char *name;
const char *pins[MAX_PINS];
unsigned int num_pins;
const char *func;
int iddq;
};
struct tegra_xusb_padctl_config {
const char *name;
struct tegra_xusb_padctl_group groups[MAX_GROUPS];
unsigned int num_groups;
};
struct tegra_xusb_padctl {
struct fdt_resource regs;
unsigned int enable;
struct tegra_xusb_phy phys[2];
const struct tegra_xusb_padctl_lane *lanes;
unsigned int num_lanes;
const char *const *functions;
unsigned int num_functions;
struct tegra_xusb_padctl_config config;
};
static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
unsigned long offset)
{
return readl(padctl->regs.start + offset);
}
static inline void padctl_writel(struct tegra_xusb_padctl *padctl,
u32 value, unsigned long offset)
{
writel(value, padctl->regs.start + offset);
}
extern struct tegra_xusb_padctl *padctl;
int tegra_xusb_padctl_parse_dt(struct tegra_xusb_padctl *padctl,
const void *fdt, int node);
int tegra_xusb_padctl_config_apply(struct tegra_xusb_padctl *padctl,
struct tegra_xusb_padctl_config *config);
#endif
Loading…
Cancel
Save